Enable VHPT support for multiple page sizes.
Signed-off-by: Anthony Xu <anthony.xu@intel.com>
ia64_ptcl(gip, ARCH_PAGE_SHIFT << 2);
return IA64_RETRY;
}
- mfn = tlb->ppn >> (PAGE_SHIFT - ARCH_PAGE_SHIFT);
maddr = (tlb->ppn >> (tlb->ps - 12) << tlb->ps) |
(gip & (PSIZE(tlb->ps) - 1));
+ mfn = maddr >> PAGE_SHIFT;
}
page = mfn_to_page(mfn);
thash_data_t *data;
ISR visr,pt_isr;
REGS *regs;
- u64 vhpt_adr;
+ u64 vhpt_adr, madr;
IA64_PSR vpsr;
regs=vcpu_regs(vcpu);
pt_isr.val=VMX(vcpu,cr_isr);
dnat_page_consumption(vcpu, vadr);
return IA64_FAULT;
}else{
- *padr = (get_gpfn_from_mfn(arch_to_xen_ppn(data->ppn)) << PAGE_SHIFT) | (vadr & (PAGE_SIZE - 1));
+ madr = (data->ppn >> (data->ps - 12) << data->ps) |
+ (vadr & (PSIZE(data->ps) - 1));
+ *padr = __mpa_to_gpa(madr);
return IA64_NO_FAULT;
}
}
ld8 r27 = [r18]
ld8 r29 = [r28]
;;
- st8 [r16] = r29
- st8 [r28] = r22
+ st8 [r16] = r29, VLE_ITIR_OFFSET - VLE_TITAG_OFFSET
+ st8 [r28] = r22, VLE_ITIR_OFFSET - VLE_TITAG_OFFSET
extr.u r19 = r27, 56, 4
;;
+ ld8 r29 = [r16]
+ ld8 r22 = [r28]
dep r27 = r0, r27, 56, 4
dep r25 = r19, r25, 56, 4
;;
+ st8 [r16] = r22
+ st8 [r28] = r29
st8 [r18] = r25
st8 [r17] = r27
;;
ld8 r27 = [r18]
ld8 r29 = [r28]
;;
- st8 [r16] = r29
- st8 [r28] = r22
+ st8 [r16] = r29, VLE_ITIR_OFFSET - VLE_TITAG_OFFSET
+ st8 [r28] = r22, VLE_ITIR_OFFSET - VLE_TITAG_OFFSET
extr.u r19 = r27, 56, 4
;;
+ ld8 r29 = [r16]
+ ld8 r22 = [r28]
dep r27 = r0, r27, 56, 4
dep r25 = r19, r25, 56, 4
;;
+ st8 [r16] = r22
+ st8 [r28] = r29
st8 [r18] = r25
st8 [r17] = r27
;;
extern void vmx_switch_rr7(unsigned long ,shared_info_t*,void *,void *,void *);
void
-physical_tlb_miss(VCPU *vcpu, u64 vadr)
+physical_tlb_miss(VCPU *vcpu, u64 vadr, int type)
{
u64 pte;
ia64_rr rr;
pte = pte | PHY_PAGE_UC;
else
pte = pte | PHY_PAGE_WB;
- thash_vhpt_insert(vcpu, pte, (rr.ps << 2), vadr);
+ thash_vhpt_insert(vcpu, pte, (rr.ps << 2), vadr, type);
return;
}
return IA64_FAULT;
}
}
- physical_tlb_miss(v, vadr);
+ physical_tlb_miss(v, vadr, type);
return IA64_FAULT;
}
return IA64_FAULT;
}
}
- thash_vhpt_insert(v,data->page_flags, data->itir ,vadr);
+ thash_vhpt_insert(v, data->page_flags, data->itir, vadr, type);
}else if(type == DSIDE_TLB){
return;
}
-void thash_vhpt_insert(VCPU *v, u64 pte, u64 itir, u64 va)
+void thash_vhpt_insert(VCPU *v, u64 pte, u64 itir, u64 va, int type)
{
- u64 phy_pte;
+ u64 phy_pte, psr;
+ ia64_rr mrr;
+
+ mrr.rrval = ia64_get_rr(va);
phy_pte=translate_phy_pte(v, &pte, itir, va);
- vmx_vhpt_insert(vcpu_get_vhpt(v), phy_pte, itir, va);
+
+ if (itir_ps(itir) >= mrr.ps) {
+ vmx_vhpt_insert(vcpu_get_vhpt(v), phy_pte, itir, va);
+ } else {
+ phy_pte &= ~PAGE_FLAGS_RV_MASK;
+ psr = ia64_clear_ic();
+ ia64_itc(type + 1, va, phy_pte, itir_ps(itir));
+ ia64_set_psr(psr);
+ ia64_srlz_i();
+ }
}
/*
* vhpt lookup
thash_data_t * vhpt_lookup(u64 va)
{
thash_data_t *hash, *head;
- u64 tag, pte;
+ u64 tag, pte, itir;
head = (thash_data_t *)ia64_thash(va);
hash=head;
tag = ia64_ttag(va);
tag = hash->etag;
hash->etag = head->etag;
head->etag = tag;
+ itir = hash->itir;
+ hash->itir = head->itir;
+ head->itir = itir;
head->len = hash->len;
hash->len=0;
return head;
if (data == NULL) {
data = vtlb_lookup(current, iha, DSIDE_TLB);
if (data != NULL)
- thash_vhpt_insert(current, data->page_flags, data->itir ,iha);
+ thash_vhpt_insert(current, data->page_flags, data->itir,
+ iha, DSIDE_TLB);
}
asm volatile ("rsm psr.ic|psr.i;;"
head=hcb->hash;
num = (hcb->hash_sz/sizeof(thash_data_t));
do{
- head->itir = PAGE_SHIFT<<2;
+ head->page_flags = 0;
+ head->itir = 0;
head->etag = 1UL<<63;
head->next = 0;
head++;
hcb->cch_freelist = p = hcb->cch_buf;
num = (hcb->cch_sz/sizeof(thash_data_t))-1;
do{
- p->itir = PAGE_SHIFT<<2;
+ p->page_flags = 0;
+ p->itir = 0;
p->next =p+1;
p++;
num--;
}while(num);
- p->itir = PAGE_SHIFT<<2;
+ p->itir = 0;
p->next = NULL;
}
#define __gpa_to_mpa(_d, gpa) \
((gmfn_to_mfn((_d),(gpa)>>PAGE_SHIFT)<<PAGE_SHIFT)|((gpa)&~PAGE_MASK))
+#define __mpa_to_gpa(madr) \
+ ((get_gpfn_from_mfn((madr) >> PAGE_SHIFT) << PAGE_SHIFT) | \
+ ((madr) & ~PAGE_MASK))
+
/* Arch-specific portion of memory_op hypercall. */
long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg);
extern int vhpt_enabled(struct vcpu *vcpu, uint64_t vadr, vhpt_ref_t ref);
extern void vtlb_insert(struct vcpu *vcpu, u64 pte, u64 itir, u64 va);
extern u64 translate_phy_pte(struct vcpu *v, u64 *pte, u64 itir, u64 va);
-extern void thash_vhpt_insert(struct vcpu *v, u64 pte, u64 itir, u64 ifa);
+extern void thash_vhpt_insert(struct vcpu *v, u64 pte, u64 itir, u64 ifa,
+ int type);
extern u64 guest_vhpt_lookup(u64 iha, u64 *pte);
static inline void vmx_vcpu_set_tr (thash_data_t *trp, u64 pte, u64 itir, u64 va, u64 rid)
extern void recover_if_physical_mode(VCPU *vcpu);
extern void vmx_init_all_rr(VCPU *vcpu);
extern void vmx_load_all_rr(VCPU *vcpu);
-extern void physical_tlb_miss(VCPU *vcpu, u64 vadr);
+extern void physical_tlb_miss(VCPU *vcpu, u64 vadr, int type);
/*
* No sanity check here, since all psr changes have been
* checked in switch_mm_mode().